Use page_list_head and stuff for consistency with x86 code.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
memset(&d->arch.mm, 0, sizeof(d->arch.mm));
d->arch.relres = RELRES_not_started;
d->arch.mm_teardown_offset = 0;
- INIT_LIST_HEAD(&d->arch.relmem_list);
+ INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL)
goto fail_nomem;
return rc;
}
-static int relinquish_memory(struct domain *d, struct list_head *list)
+static int relinquish_memory(struct domain *d, struct page_list_head *list)
{
- struct list_head *ent;
struct page_info *page;
#ifndef __ia64__
unsigned long x, y;
/* Use a recursive lock, as we may enter 'free_domheap_page'. */
spin_lock_recursive(&d->page_alloc_lock);
- ent = list->next;
- while ( ent != list )
+
+ while ( (page = page_list_remove_head(list)) )
{
- page = list_entry(ent, struct page_info, list);
/* Grab a reference to the page so it won't disappear from under us. */
if ( unlikely(!get_page(page, d)) )
{
/* Couldn't get a reference -- someone is freeing this page. */
- ent = ent->next;
- list_move_tail(&page->list, &d->arch.relmem_list);
+ page_list_add_tail(page, &d->arch.relmem_list);
continue;
}
#endif
/* Follow the list chain and /then/ potentially free the page. */
- ent = ent->next;
BUG_ON(get_gpfn_from_mfn(page_to_mfn(page)) != INVALID_M2P_ENTRY);
- list_move_tail(&page->list, &d->arch.relmem_list);
+ page_list_add_tail(page, &d->arch.relmem_list);
put_page(page);
if (hypercall_preempt_check()) {
}
}
- list_splice_init(&d->arch.relmem_list, list);
+ /* list is empty at this point. */
+ if ( !page_list_empty(&d->arch.relmem_list) )
+ {
+ *list = d->arch.relmem_list;
+ INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
+ }
+
out:
spin_unlock_recursive(&d->page_alloc_lock);
page->count_info |= PGC_allocated | 1;
if ( unlikely(d->xenheap_pages++ == 0) )
get_knownalive_domain(d);
- list_add_tail(&page->list, &d->xenpage_list);
+ page_list_add_tail(page, &d->xenpage_list);
}
// grant_table_destroy() releases these pages.
/* Unlink from original owner. */
if ( !(memflags & MEMF_no_refcount) )
d->tot_pages--;
- list_del(&page->list);
+ page_list_del(page, &d->page_list);
spin_unlock(&d->page_alloc_lock);
perfc_incr(steal_page);
return -ENOMEM;
}
- list_add(&entry_page->list, &tlb_track->page_list);
+ page_list_add(entry_page, &tlb_track->page_list);
track_entries = (struct tlb_track_entry*)page_to_virt(entry_page);
allocated = PAGE_SIZE / sizeof(track_entries[0]);
tlb_track->num_entries += allocated;
tlb_track->limit = TLB_TRACK_LIMIT_ENTRIES;
tlb_track->num_entries = 0;
tlb_track->num_free = 0;
- INIT_LIST_HEAD(&tlb_track->page_list);
+ INIT_PAGE_LIST_HEAD(&tlb_track->page_list);
if (tlb_track_allocate_entries(tlb_track) < 0)
goto out;
spin_lock(&tlb_track->free_list_lock);
BUG_ON(tlb_track->num_free != tlb_track->num_entries);
- list_for_each_entry_safe(page, next, &tlb_track->page_list, list) {
- list_del(&page->list);
+ page_list_for_each_safe(page, next, &tlb_track->page_list) {
+ page_list_del(page, &tlb_track->page_list);
free_domheap_page(page);
}
#include <asm/vmx_platform.h>
#include <xen/list.h>
#include <xen/cpumask.h>
+#include <xen/mm.h>
#include <asm/fpswa.h>
#include <xen/rangeset.h>
/* Continuable mm_teardown() */
unsigned long mm_teardown_offset;
/* Continuable domain_relinquish_resources() */
- struct list_head relmem_list;
+ struct page_list_head relmem_list;
};
#define INT_ENABLE_OFFSET(v) \
(sizeof(vcpu_info_t) * (v)->vcpu_id + \
#define PRtype_info "016lx"
+#if 0
+/*
+ * See include/xen/mm.h.
+ * For now, abandon to compress struct page_info
+ * seeing IA64_MAX_PHYS_BITS and page size.
+ */
+#undef page_list_entry
+struct page_list_entry
+{
+ unsigned long next, prev;
+};
+#endif
+
struct page_info
{
/* Each frame can be threaded onto a doubly-linked list. */
- struct list_head list;
+ struct page_list_entry list;
/* Reference count and various PGC_xxx flags and fields. */
unsigned long count_info;
unsigned int limit;
unsigned int num_entries;
unsigned int num_free;
- struct list_head page_list;
+ struct page_list_head page_list;
/* XXX hash table size */
spinlock_t hash_lock;